%pylab inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.applications.vgg16 import preprocess_input
from keras.applications.vgg16 import decode_predictions
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import preprocess_input
from keras.applications.vgg19 import decode_predictions
from keras.applications.vgg19 import VGG19
def show_img(img_path):
img=mpimg.imread(img_path)
imgplot = plt.imshow(img)
plt.show()
def vgg16_pred(image_path):
# load the model
model = VGG16()
# load an image from file
image = load_img(image_path, target_size=(224, 224))
# convert the image pixels to a numpy array
image = img_to_array(image)
# reshape data for the model
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
# prepare the image for the VGG model
image = preprocess_input(image)
# predict the probability across all output classes
yhat = model.predict(image)
# convert the probabilities to class labels
label = decode_predictions(yhat)
# retrieve the most likely result, e.g. highest probability
label = label[0][0]
# print the classification
print('vgg16 prediction: %s (%.2f%%)' % (label[1], label[2]*100))
def vgg19_pred(image_path):
# load the model
model = VGG19()
# load an image from file
image = load_img(image_path, target_size=(224, 224))
# convert the image pixels to a numpy array
image = img_to_array(image)
# reshape data for the model
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
# prepare the image for the VGG model
image = preprocess_input(image)
# predict the probability across all output classes
yhat = model.predict(image)
# convert the probabilities to class labels
label = decode_predictions(yhat)
# retrieve the most likely result, e.g. highest probability
label = label[0][0]
# print the classification
print('vgg19 prediction: %s (%.2f%%)' % (label[1], label[2]*100))
show_img('images/balls/b3.jpg')
vgg16_pred('images/balls/b3.jpg')
vgg19_pred('images/balls/b3.jpg')
show_img('images/horses/h4.jpg')
vgg16_pred('images/horses/h4.jpg')
vgg19_pred('images/horses/h4.jpg')
show_img('images/birds/bird1.jpg')
vgg16_pred('images/birds/bird1.jpg')
vgg19_pred('images/birds/bird1.jpg')
show_img('images/houses/ho17.jpg')
vgg16_pred('images/houses/ho17.jpg')
vgg19_pred('images/houses/ho17.jpg')
show_img('images/spiders/spider8.jpg')
vgg16_pred('images/spiders/spider8.jpg')
vgg19_pred('images/spiders/spider8.jpg')
show_img('images/snakes/snake23.jpg')
vgg16_pred('images/snakes/snake23.jpg')
vgg19_pred('images/snakes/snake23.jpg')
show_img('images/cars/c1.jpg')
vgg16_pred('images/cars/c1.jpg')
vgg19_pred('images/cars/c1.jpg')
show_img('images/women/women4.jpg')
vgg16_pred('images/women/women4.jpg')
vgg19_pred('images/women/women4.jpg')
show_img('images/men/men15.jpg')
vgg16_pred('images/men/men15.jpg')
vgg19_pred('images/men/men15.jpg')
from os import listdir
from os.path import isfile, join
files = [f for f in listdir('images/test-images') if isfile(join('images/test-images', f))]
#pick a random image from the test images
filepath = 'images/test-images/'+files[random.randint(0, len(files))]
show_img(filepath)
vgg16_pred(filepath)
vgg19_pred(filepath)
Below codes are Vgg16 and VGG19 after editing the last four layers to make it predict the images based on the nine provided classification.
# Importing all necessary libraries
from keras.preprocessing.image import ImageDataGenerator
from keras.applications import VGG16
from keras.applications import VGG19
import matplotlib.pyplot as plt
from keras import models
from keras import layers
from keras import optimizers
#Load the VGG models
vgg16_conv = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
vgg19_conv = VGG19(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
# Freeze the layers except the last 4 layers
for layer in vgg16_conv.layers[:-4]:
layer.trainable = False
# Check the trainable status of the individual layers
for layer in vgg16_conv.layers:
print(layer, layer.trainable)
# Create the model
model16 = models.Sequential()
# Add the vgg convolutional base model
model16.add(vgg16_conv)
# Add new layers
model16.add(layers.Flatten())
model16.add(layers.Dense(1024, activation='relu'))
model16.add(layers.Dropout(0.5))
model16.add(layers.Dense(9, activation='softmax'))
# Show a summary of the model. Check the number of trainable parameters
model16.summary()
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
validation_datagen = ImageDataGenerator(rescale=1./255)
# Change the batchsize according to your system RAM
train_batchsize = 100
val_batchsize = 10
train_generator = train_datagen.flow_from_directory(
'images/train',
target_size=(224, 224),
batch_size=train_batchsize,
class_mode='categorical')
validation_generator = validation_datagen.flow_from_directory(
'images/valid',
target_size=(224, 224),
batch_size=val_batchsize,
class_mode='categorical',
shuffle=False)
# Compile the model
model16.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['acc'])
# Train the model
history16 = model16.fit_generator(
train_generator,
steps_per_epoch=train_generator.samples/train_generator.batch_size ,
epochs=10,
validation_data=validation_generator,
validation_steps=validation_generator.samples/validation_generator.batch_size,
verbose=1)
# Save the model
model16.save('small_last4.h5')
acc = history16.history['acc']
val_acc = history16.history['val_acc']
loss = history16.history['loss']
val_loss = history16.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'b', label='Training acc')
plt.plot(epochs, val_acc, 'r', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'b', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
def vgg16_edited_pred(model, image_path):
# load an image from file
image = load_img(image_path, target_size=(224, 224))
# convert the image pixels to a numpy array
image = img_to_array(image)
# reshape data for the model
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
# prepare the image for the VGG model
image = preprocess_input(image)
# predict the probability across all output classes
yhat = model.predict(image)
# convert the probabilities to class labels
predicted_class = np.argmax(yhat,axis=1)
pred_label = idx2label[pred_class]
# retrieve the most likely result, e.g. highest probability
# print the classification
print('vgg16 edited prediction: {}'.format(pred_label))
# Freeze the layers except the last 4 layers
for layer in vgg19_conv.layers[:-4]:
layer.trainable = False
# Check the trainable status of the individual layers
for layer in vgg19_conv.layers:
print(layer, layer.trainable)
# Create the model
model19 = models.Sequential()
# Add the vgg convolutional base model
model19.add(vgg19_conv)
# Add new layers
model19.add(layers.Flatten())
model19.add(layers.Dense(1024, activation='relu'))
model19.add(layers.Dropout(0.5))
model19.add(layers.Dense(9, activation='softmax'))
# Show a summary of the model. Check the number of trainable parameters
model19.summary()
# Compile the model
model19.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['acc'])
# Train the model
history19 = model19.fit_generator(
train_generator,
steps_per_epoch=train_generator.samples/train_generator.batch_size ,
epochs=10,
validation_data=validation_generator,
validation_steps=validation_generator.samples/validation_generator.batch_size,
verbose=1)
# Save the model
model19.save('small_last4.h5')
acc = history19.history['acc']
val_acc = history19.history['val_acc']
loss = history19.history['loss']
val_loss = history19.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'b', label='Training acc')
plt.plot(epochs, val_acc, 'r', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'b', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
print('VGG16 edited model')
# Create a generator for prediction
validation_generator = validation_datagen.flow_from_directory(
'images/valid',
target_size=(224, 224),
batch_size=val_batchsize,
class_mode='categorical',
shuffle=False)
# Get the filenames from the generator
fnames = validation_generator.filenames
# Get the ground truth from generator
ground_truth = validation_generator.classes
# Get the label to class mapping from the generator
label2index = validation_generator.class_indices
# Getting the mapping from class index to class label
idx2label = dict((v,k) for k,v in label2index.items())
# Get the predictions from the model using the generator
predictions = model16.predict_generator(validation_generator, steps=validation_generator.samples/validation_generator.batch_size,verbose=1)
predicted_classes = np.argmax(predictions,axis=1)
errors = np.where(predicted_classes != ground_truth)[0]
print("No of errors = {}/{}".format(len(errors),validation_generator.samples))
# Show the errors
for i in range(len(errors)):
pred_class = np.argmax(predictions[errors[i]])
pred_label = idx2label[pred_class]
title = 'Original label:{}, Prediction :{}, confidence : {:.3f}'.format(
fnames[errors[i]].split('/')[0],
pred_label,
predictions[errors[i]][pred_class])
original = load_img('{}/{}'.format('images/valid',fnames[errors[i]]))
plt.figure(figsize=[7,7])
plt.axis('off')
plt.title(title)
plt.imshow(original)
plt.show()
print('VGG16 edited model')
# Create a generator for prediction
validation_generator = validation_datagen.flow_from_directory(
'images/valid',
target_size=(224, 224),
batch_size=val_batchsize,
class_mode='categorical',
shuffle=False)
# Get the filenames from the generator
fnames = validation_generator.filenames
# Get the ground truth from generator
ground_truth = validation_generator.classes
# Get the label to class mapping from the generator
label2index = validation_generator.class_indices
# Getting the mapping from class index to class label
idx2label = dict((v,k) for k,v in label2index.items())
# Get the predictions from the model using the generator
predictions = model19.predict_generator(validation_generator, steps=validation_generator.samples/validation_generator.batch_size,verbose=1)
predicted_classes = np.argmax(predictions,axis=1)
errors = np.where(predicted_classes != ground_truth)[0]
print("No of errors = {}/{}".format(len(errors),validation_generator.samples))
# Show the errors
for i in range(len(errors)):
pred_class = np.argmax(predictions[errors[i]])
pred_label = idx2label[pred_class]
title = 'Original label:{}, Prediction :{}, confidence : {:.3f}'.format(
fnames[errors[i]].split('/')[0],
pred_label,
predictions[errors[i]][pred_class])
original = load_img('{}/{}'.format('images/valid',fnames[errors[i]]))
plt.figure(figsize=[7,7])
plt.axis('off')
plt.title(title)
plt.imshow(original)
plt.show()